library(forecast)
## Registered S3 method overwritten by 'xts':
## method from
## as.zoo.xts zoo
## Registered S3 method overwritten by 'quantmod':
## method from
## as.zoo.data.frame zoo
library(dplyr)
##
## Attaching package: 'dplyr'
## The following objects are masked from 'package:stats':
##
## filter, lag
## The following objects are masked from 'package:base':
##
## intersect, setdiff, setequal, union
library(astsa)
##
## Attaching package: 'astsa'
## The following object is masked from 'package:forecast':
##
## gas
library(fGarch)
## Loading required package: timeDate
## Loading required package: timeSeries
## Loading required package: fBasics
##
## Attaching package: 'fBasics'
## The following object is masked from 'package:astsa':
##
## nyse
library(dynlm)
## Loading required package: zoo
##
## Attaching package: 'zoo'
## The following object is masked from 'package:timeSeries':
##
## time<-
## The following objects are masked from 'package:base':
##
## as.Date, as.Date.numeric
library(tseries)
library(xts)
##
## Attaching package: 'xts'
## The following objects are masked from 'package:dplyr':
##
## first, last
library(fpp)
## Loading required package: fma
##
## Attaching package: 'fma'
## The following objects are masked from 'package:astsa':
##
## chicken, sales
## Loading required package: expsmooth
## Loading required package: lmtest
##
## Attaching package: 'fpp'
## The following object is masked from 'package:astsa':
##
## oil
library(TSA)
## Registered S3 methods overwritten by 'TSA':
## method from
## fitted.Arima forecast
## plot.Arima forecast
##
## Attaching package: 'TSA'
## The following objects are masked from 'package:timeDate':
##
## kurtosis, skewness
## The following objects are masked from 'package:stats':
##
## acf, arima
## The following object is masked from 'package:utils':
##
## tar
data = read.csv("CTOT-1980-2020.csv")
data$Time.Period <- gsub("M", "/", data$Time.Period )
data['date'] = '/01'
data$Time.Period <- paste(data$Time.Period, data$date, sep="")
data$Time.Period = as.Date(data$Time.Period, "%Y/%m/%d")
data = data[order(data$Time.Period),]
data = subset(data, select = -c(date))
rownames(data)=NULL
par(mfrow=c(1,3))
ts(data$United.States, frequency = 12, start = c(1980, 1))%>%plot(main = "United States: CNEPI")
ts(data$United.Kingdom, frequency = 12, start = c(1980, 1))%>%plot(main = "United Kingdom: CNEPI")
ts(data$Canada, frequency = 12, start = c(1980, 1))%>%plot(main = "Canada: CNEPI")
df_can = ts(data$Canada, frequency = 12, start = c(1980, 1))
plot.ts(df_can)
plot(decompose(df_can))
Clear trend: decrease at first, then increase, then decrease a little bit. By decomposing, there is seasonality.
sp=spectrum(df_can, log='no', main = "Canada: Periodogram")
sp$fre[which.max(sp$spec)]
## [1] 0.02469136
1/sp$fre[which.max(sp$spec)]
## [1] 40.5
max(sp$spec) #estimate of spectral density at predominant period
## [1] 11.47306
U = qchisq(.025,2)
L = qchisq(.975,2)
2*max(sp$spec)/L #lower bound for spectral density
## [1] 3.110174
2*max(sp$spec)/U #upper bound for spectral density
## [1] 453.1615
The predominant frequency is near 0.025, therefore the period of the cycle is 40.5 month. The spectral density at predominant period is 11.47. The 95% confidence interval is [3.11, 453.16].
# make it stationary
difflog_df_can = diff(log(df_can)) #take the log transformation and first order difference
plot.ts(difflog_df_can, main = "Canada: Transformed CNEPI")
acf2(difflog_df_can)
## ACF PACF
## [1,] 0.20 0.20
## [2,] 0.14 0.10
## [3,] 0.05 0.00
## [4,] 0.12 0.10
## [5,] 0.01 -0.03
## [6,] 0.01 -0.02
## [7,] 0.02 0.03
## [8,] -0.04 -0.06
## [9,] -0.02 0.00
## [10,] 0.03 0.05
## [11,] 0.04 0.03
## [12,] -0.05 -0.07
## [13,] -0.10 -0.09
## [14,] -0.09 -0.06
## [15,] -0.05 -0.01
## [16,] -0.02 0.03
## [17,] -0.04 -0.02
## [18,] -0.02 0.00
## [19,] -0.01 0.02
## [20,] 0.08 0.08
## [21,] 0.04 0.01
## [22,] -0.03 -0.07
## [23,] -0.09 -0.08
## [24,] -0.11 -0.08
## [25,] -0.03 0.02
## [26,] -0.08 -0.06
## [27,] -0.07 -0.04
## [28,] -0.06 -0.01
## [29,] -0.11 -0.09
## [30,] -0.01 0.04
## [31,] 0.04 0.06
## [32,] 0.02 0.00
## [33,] 0.01 0.05
## [34,] 0.00 0.01
## [35,] 0.10 0.09
## [36,] -0.01 -0.07
## [37,] -0.01 -0.05
## [38,] 0.01 0.00
## [39,] 0.02 -0.01
## [40,] 0.05 0.04
## [41,] 0.05 0.00
## [42,] 0.05 0.00
## [43,] -0.03 -0.04
## [44,] 0.04 0.07
## [45,] 0.01 0.00
## [46,] 0.03 0.01
## [47,] -0.05 -0.04
## [48,] -0.01 0.01
#auto.arima(df_can)
If we ignore the seasonality, based on the ACF and PACF, we should try ARIMA(1,0,1) and ARIMA(1,0,2)
arima101 = sarima(difflog_df_can,1,0,1)
## initial value -6.453852
## iter 2 value -6.466551
## iter 3 value -6.472981
## iter 4 value -6.473308
## iter 5 value -6.477803
## iter 6 value -6.480102
## iter 7 value -6.481003
## iter 8 value -6.481915
## iter 9 value -6.482039
## iter 10 value -6.482082
## iter 11 value -6.482113
## iter 12 value -6.482180
## iter 13 value -6.482207
## iter 14 value -6.482211
## iter 15 value -6.482211
## iter 15 value -6.482211
## final value -6.482211
## converged
## initial value -6.481229
## iter 2 value -6.481236
## iter 3 value -6.481242
## iter 4 value -6.481251
## iter 5 value -6.481259
## iter 6 value -6.481262
## iter 7 value -6.481262
## iter 8 value -6.481263
## iter 9 value -6.481263
## iter 10 value -6.481263
## iter 11 value -6.481265
## iter 12 value -6.481265
## iter 13 value -6.481265
## iter 14 value -6.481265
## iter 15 value -6.481265
## iter 15 value -6.481265
## final value -6.481265
## converged
arima102 = sarima(difflog_df_can,1,0,2)
## initial value -6.453852
## iter 2 value -6.456471
## iter 3 value -6.478265
## iter 4 value -6.478721
## iter 5 value -6.478814
## iter 6 value -6.478827
## iter 7 value -6.479113
## iter 8 value -6.479783
## iter 9 value -6.479975
## iter 10 value -6.480491
## iter 11 value -6.481079
## iter 12 value -6.481672
## iter 13 value -6.481706
## iter 14 value -6.481813
## iter 15 value -6.481937
## iter 16 value -6.482024
## iter 17 value -6.482069
## iter 18 value -6.482106
## iter 19 value -6.482150
## iter 20 value -6.482154
## iter 21 value -6.482178
## iter 22 value -6.482185
## iter 23 value -6.482205
## iter 24 value -6.482208
## iter 25 value -6.482209
## iter 26 value -6.482209
## iter 27 value -6.482210
## iter 28 value -6.482211
## iter 29 value -6.482211
## iter 30 value -6.482211
## iter 31 value -6.482212
## iter 31 value -6.482212
## iter 31 value -6.482212
## final value -6.482212
## converged
## initial value -6.481240
## iter 2 value -6.481241
## iter 3 value -6.481251
## iter 4 value -6.481255
## iter 5 value -6.481264
## iter 6 value -6.481267
## iter 7 value -6.481268
## iter 8 value -6.481269
## iter 9 value -6.481270
## iter 10 value -6.481273
## iter 11 value -6.481273
## iter 12 value -6.481274
## iter 13 value -6.481274
## iter 14 value -6.481275
## iter 15 value -6.481276
## iter 16 value -6.481276
## iter 17 value -6.481276
## iter 18 value -6.481277
## iter 19 value -6.481277
## iter 20 value -6.481277
## iter 21 value -6.481277
## iter 21 value -6.481277
## iter 21 value -6.481277
## final value -6.481277
## converged
arima_table = data.frame(Model = c("ARIMA(1,0,1)", "ARIMA(1,0,2)"), AIC = c(arima101$AIC, arima102$AIC), BIC = c(arima101$BIC, arima102$BIC), AICc = c(arima101$AICc, arima102$AICc))
arima_table
## Model AIC BIC AICc
## 1 ARIMA(1,0,1) -10.10802 -10.07330 -10.10792
## 2 ARIMA(1,0,2) -10.10389 -10.06048 -10.10371
Comparing these two models, ARIMA(1, 0, 1) is better.
auto.arima(difflog_df_can, seasonal = FALSE)
## Series: difflog_df_can
## ARIMA(1,0,1) with zero mean
##
## Coefficients:
## ar1 ma1
## 0.6549 -0.4734
## s.e. 0.1283 0.1492
##
## sigma^2 estimated as 2.356e-06: log likelihood=2434.95
## AIC=-4863.89 AICc=-4863.84 BIC=-4851.37
Then we use auto.arima() function to prove the result, it shows ARIMA(1, 0, 1) is good.
auto.arima(df_can, seasonal = TRUE)
## Series: df_can
## ARIMA(1,1,1)
##
## Coefficients:
## ar1 ma1
## 0.6569 -0.4740
## s.e. 0.1259 0.1465
##
## sigma^2 estimated as 0.02283: log likelihood=227.69
## AIC=-449.38 AICc=-449.33 BIC=-436.85
Therefore, the seasonality component is not very significant. We can directly use ARIMA model instead of SARIMA model.
#since ARIMA(1,0,1) is the best model, we use this model to predict the future
fit_can = Arima(df_can, order=c(1,1,1))
pred_can=forecast(fit_can, h=48)
pred_can # The last 2 columns are 95% prediction intervals lower bound and upper bound
## Point Forecast Lo 80 Hi 80 Lo 95 Hi 95
## Mar 2020 98.22508 98.03144 98.41871 97.92894 98.52122
## Apr 2020 98.16358 97.86366 98.46350 97.70489 98.62227
## May 2020 98.12319 97.73126 98.51511 97.52379 98.72258
## Jun 2020 98.09665 97.62211 98.57120 97.37090 98.82241
## Jul 2020 98.07922 97.52945 98.62900 97.23841 98.92003
## Aug 2020 98.06777 97.44889 98.68666 97.12127 99.01427
## Sep 2020 98.06025 97.37742 98.74308 97.01596 99.10455
## Oct 2020 98.05531 97.31292 98.79771 96.91992 99.19071
## Nov 2020 98.05207 97.25387 98.85027 96.83132 99.27282
## Dec 2020 98.04994 97.19916 98.90072 96.74878 99.35110
## Jan 2021 98.04854 97.14798 98.94910 96.67125 99.42583
## Feb 2021 98.04762 97.09973 98.99551 96.59794 99.49730
## Mar 2021 98.04701 97.05394 99.04009 96.52824 99.56579
## Apr 2021 98.04662 97.01026 99.08297 96.46165 99.63159
## May 2021 98.04636 96.96842 99.12430 96.39779 99.69492
## Jun 2021 98.04619 96.92818 99.16419 96.33635 99.75603
## Jul 2021 98.04607 96.88938 99.20277 96.27706 99.81509
## Aug 2021 98.04600 96.85185 99.24015 96.21971 99.87229
## Sep 2021 98.04595 96.81548 99.27642 96.16411 99.92779
## Oct 2021 98.04592 96.78017 99.31167 96.11012 99.98172
## Nov 2021 98.04590 96.74582 99.34598 96.05760 100.03420
## Dec 2021 98.04589 96.71236 99.37941 96.00644 100.08533
## Jan 2022 98.04588 96.67972 99.41203 95.95653 100.13523
## Feb 2022 98.04587 96.64785 99.44389 95.90778 100.18396
## Mar 2022 98.04587 96.61669 99.47504 95.86013 100.23160
## Apr 2022 98.04586 96.58619 99.50553 95.81349 100.27824
## May 2022 98.04586 96.55632 99.53540 95.76781 100.32392
## Jun 2022 98.04586 96.52704 99.56468 95.72302 100.36870
## Jul 2022 98.04586 96.49831 99.59341 95.67909 100.41263
## Aug 2022 98.04586 96.47011 99.62161 95.63595 100.45577
## Sep 2022 98.04586 96.44240 99.64932 95.59357 100.49814
## Oct 2022 98.04586 96.41516 99.67656 95.55192 100.53980
## Nov 2022 98.04586 96.38837 99.70335 95.51095 100.58077
## Dec 2022 98.04586 96.36200 99.72971 95.47062 100.62109
## Jan 2023 98.04586 96.33605 99.75567 95.43093 100.66079
## Feb 2023 98.04586 96.31048 99.78124 95.39182 100.69990
## Mar 2023 98.04586 96.28528 99.80644 95.35328 100.73843
## Apr 2023 98.04586 96.26044 99.83128 95.31529 100.77643
## May 2023 98.04586 96.23593 99.85578 95.27782 100.81390
## Jun 2023 98.04586 96.21176 99.87996 95.24085 100.85087
## Jul 2023 98.04586 96.18790 99.90382 95.20436 100.88736
## Aug 2023 98.04586 96.16434 99.92737 95.16833 100.92339
## Sep 2023 98.04586 96.14108 99.95064 95.13275 100.95897
## Oct 2023 98.04586 96.11809 99.97363 95.09759 100.99412
## Nov 2023 98.04586 96.09538 99.99634 95.06286 101.02886
## Dec 2023 98.04586 96.07292 100.01879 95.02852 101.06320
## Jan 2024 98.04586 96.05072 100.04099 94.99456 101.09715
## Feb 2024 98.04586 96.02877 100.06295 94.96099 101.13073
autoplot(pred_can) #plot with confidence interval
#use the best model arima(1,0,1)
acf2(resid(arima101$fit), 20)
## ACF PACF
## [1,] 0.00 0.00
## [2,] 0.01 0.01
## [3,] -0.05 -0.05
## [4,] 0.08 0.08
## [5,] -0.03 -0.03
## [6,] -0.02 -0.02
## [7,] 0.02 0.03
## [8,] -0.05 -0.06
## [9,] -0.02 -0.02
## [10,] 0.04 0.05
## [11,] 0.07 0.06
## [12,] -0.04 -0.03
## [13,] -0.08 -0.08
## [14,] -0.07 -0.07
## [15,] -0.02 -0.03
## [16,] 0.01 0.02
## [17,] -0.03 -0.03
## [18,] -0.01 -0.01
## [19,] -0.01 0.00
## [20,] 0.09 0.09
acf2(resid(arima101$fit)^2, 20)
## ACF PACF
## [1,] 0.17 0.17
## [2,] 0.14 0.11
## [3,] 0.08 0.04
## [4,] 0.09 0.06
## [5,] 0.14 0.11
## [6,] 0.04 -0.02
## [7,] 0.11 0.08
## [8,] 0.04 -0.01
## [9,] 0.07 0.03
## [10,] 0.10 0.06
## [11,] 0.05 0.01
## [12,] 0.10 0.06
## [13,] 0.13 0.09
## [14,] 0.07 -0.01
## [15,] 0.00 -0.06
## [16,] 0.04 0.02
## [17,] 0.06 0.02
## [18,] 0.05 0.01
## [19,] 0.05 0.02
## [20,] 0.10 0.07
Based on the ACF and PACF of residuals, it is more like the white noise. Based on the ACF and PACF of residuals squared, we should try GARCH(1,1) and GARCH(1,2).
can.garch11 = garchFit(~arma(1,1)+garch(1,1), difflog_df_can)
##
## Series Initialization:
## ARMA Model: arma
## Formula Mean: ~ arma(1, 1)
## GARCH Model: garch
## Formula Variance: ~ garch(1, 1)
## ARMA Order: 1 1
## Max ARMA Order: 1
## GARCH Order: 1 1
## Max GARCH Order: 1
## Maximum Order: 1
## Conditional Dist: norm
## h.start: 2
## llh.start: 1
## Length of Series: 481
## Recursion Init: mci
## Series Scale: 0.001575839
##
## Parameter Initialization:
## Initial Parameters: $params
## Limits of Transformations: $U, $V
## Which Parameters are Fixed? $includes
## Parameter Matrix:
## U V params includes
## mu -0.11701707 0.1170171 -0.01720972 TRUE
## ar1 -0.99999999 1.0000000 0.65255913 TRUE
## ma1 -0.99999999 1.0000000 -0.47076571 TRUE
## omega 0.00000100 100.0000000 0.10000000 TRUE
## alpha1 0.00000001 1.0000000 0.10000000 TRUE
## gamma1 -0.99999999 1.0000000 0.10000000 FALSE
## beta1 0.00000001 1.0000000 0.80000000 TRUE
## delta 0.00000000 2.0000000 2.00000000 FALSE
## skew 0.10000000 10.0000000 1.00000000 FALSE
## shape 1.00000000 10.0000000 4.00000000 FALSE
## Index List of Parameters to be Optimized:
## mu ar1 ma1 omega alpha1 beta1
## 1 2 3 4 5 7
## Persistence: 0.9
##
##
## --- START OF TRACE ---
## Selected Algorithm: nlminb
##
## R coded nlminb Solver:
##
## 0: 652.57616: -0.0172097 0.652559 -0.470766 0.100000 0.100000 0.800000
## 1: 652.32772: -0.0172094 0.650801 -0.472294 0.0946467 0.0993716 0.798227
## 2: 652.11755: -0.0172090 0.648497 -0.474297 0.0937717 0.103216 0.801799
## 3: 651.81701: -0.0172081 0.643946 -0.478280 0.0832517 0.104893 0.802451
## 4: 651.03756: -0.0172055 0.636373 -0.485044 0.0714293 0.114592 0.818730
## 5: 650.48260: -0.0172022 0.636953 -0.484843 0.0537044 0.110066 0.835066
## 6: 650.04977: -0.0171989 0.635689 -0.486230 0.0468334 0.100157 0.856349
## 7: 649.99430: -0.0171940 0.635093 -0.487421 0.0350331 0.0877598 0.873873
## 8: 649.88045: -0.0171911 0.638559 -0.484898 0.0334912 0.0927242 0.879164
## 9: 649.79894: -0.0171848 0.633304 -0.490278 0.0353481 0.0905375 0.876269
## 10: 649.79828: -0.0171847 0.633389 -0.490215 0.0348223 0.0903751 0.876049
## 11: 649.79376: -0.0171838 0.633663 -0.490082 0.0347808 0.0907415 0.876405
## 12: 649.79177: -0.0171814 0.634331 -0.489777 0.0339481 0.0910082 0.876728
## 13: 649.78992: -0.0171682 0.636025 -0.489737 0.0338104 0.0907820 0.877988
## 14: 649.78706: -0.0171509 0.637080 -0.490603 0.0334753 0.0896941 0.878657
## 15: 649.78593: -0.0171309 0.637367 -0.492193 0.0331487 0.0899850 0.878945
## 16: 649.78592: -0.0171307 0.637396 -0.492180 0.0331606 0.0899849 0.878968
## 17: 649.78590: -0.0171304 0.637416 -0.492190 0.0331456 0.0899714 0.878959
## 18: 649.78588: -0.0171295 0.637460 -0.492209 0.0331568 0.0899705 0.878978
## 19: 649.78146: -0.0167818 0.649598 -0.505917 0.0336529 0.0888461 0.879244
## 20: 649.77778: -0.0163937 0.657413 -0.513223 0.0334944 0.0908955 0.878008
## 21: 649.75666: -0.0147628 0.652140 -0.505846 0.0324134 0.0895373 0.880861
## 22: 649.73293: -0.0131290 0.649367 -0.504686 0.0340228 0.0907440 0.877271
## 23: 649.71279: -0.00944426 0.669117 -0.530055 0.0333226 0.0899973 0.878668
## 24: 649.71255: -0.00923274 0.668181 -0.528771 0.0338174 0.0902406 0.878223
## 25: 649.71190: -0.00916240 0.668585 -0.528115 0.0337940 0.0899286 0.878318
## 26: 649.71179: -0.00923116 0.667460 -0.527102 0.0337796 0.0899892 0.878229
## 27: 649.71176: -0.00929192 0.666126 -0.525528 0.0337988 0.0899880 0.878225
## 28: 649.71176: -0.00926786 0.666307 -0.525764 0.0337945 0.0899943 0.878219
## 29: 649.71176: -0.00926901 0.666311 -0.525766 0.0337931 0.0899927 0.878222
##
## Final Estimate of the Negative LLH:
## LLH: -2454.166 norm LLH: -5.102215
## mu ar1 ma1 omega alpha1
## -1.460646e-05 6.663108e-01 -5.257660e-01 8.391735e-08 8.999265e-02
## beta1
## 8.782219e-01
##
## R-optimhess Difference Approximated Hessian Matrix:
## mu ar1 ma1 omega
## mu -1.140214e+09 1.012418e+04 -3.832014e+04 8.627638e+10
## ar1 1.012418e+04 -7.858946e+02 -6.548942e+02 6.069302e+07
## ma1 -3.832014e+04 -6.548942e+02 -5.748657e+02 4.612099e+07
## omega 8.627638e+10 6.069302e+07 4.612099e+07 -4.507228e+15
## alpha1 6.797363e+03 -2.010849e+01 -2.401507e+01 -6.546901e+09
## beta1 2.021233e+04 1.332437e+02 1.053037e+02 -8.488471e+09
## alpha1 beta1
## mu 6.797363e+03 2.021233e+04
## ar1 -2.010849e+01 1.332437e+02
## ma1 -2.401507e+01 1.053037e+02
## omega -6.546901e+09 -8.488471e+09
## alpha1 -1.292604e+04 -1.438395e+04
## beta1 -1.438395e+04 -1.812785e+04
## attr(,"time")
## Time difference of 0.01559806 secs
##
## --- END OF TRACE ---
##
##
## Time to Estimate Parameters:
## Time difference of 0.06536603 secs
summary(can.garch11)
##
## Title:
## GARCH Modelling
##
## Call:
## garchFit(formula = ~arma(1, 1) + garch(1, 1), data = difflog_df_can)
##
## Mean and Variance Equation:
## data ~ arma(1, 1) + garch(1, 1)
## <environment: 0x7fd9ca78b068>
## [data = difflog_df_can]
##
## Conditional Distribution:
## norm
##
## Coefficient(s):
## mu ar1 ma1 omega alpha1
## -1.4606e-05 6.6631e-01 -5.2577e-01 8.3917e-08 8.9993e-02
## beta1
## 8.7822e-01
##
## Std. Errors:
## based on Hessian
##
## Error Analysis:
## Estimate Std. Error t value Pr(>|t|)
## mu -1.461e-05 3.082e-05 -0.474 0.635555
## ar1 6.663e-01 1.639e-01 4.065 4.8e-05 ***
## ma1 -5.258e-01 1.919e-01 -2.740 0.006145 **
## omega 8.392e-08 4.461e-08 1.881 0.059929 .
## alpha1 8.999e-02 2.649e-02 3.397 0.000681 ***
## beta1 8.782e-01 3.349e-02 26.227 < 2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Log Likelihood:
## 2454.166 normalized: 5.102215
##
## Description:
## Wed Apr 22 22:50:27 2020 by user:
##
##
## Standardised Residuals Tests:
## Statistic p-Value
## Jarque-Bera Test R Chi^2 10.60525 0.004978501
## Shapiro-Wilk Test R W 0.9910803 0.005340228
## Ljung-Box Test R Q(10) 5.807305 0.8311844
## Ljung-Box Test R Q(15) 13.96156 0.5284465
## Ljung-Box Test R Q(20) 19.81849 0.4693346
## Ljung-Box Test R^2 Q(10) 10.47417 0.399922
## Ljung-Box Test R^2 Q(15) 16.66719 0.3391457
## Ljung-Box Test R^2 Q(20) 19.92415 0.4626832
## LM Arch Test R TR^2 11.92809 0.4514717
##
## Information Criterion Statistics:
## AIC BIC SIC HQIC
## -10.17948 -10.12739 -10.17979 -10.15901
can.garch12 = garchFit(~arma(1,1)+garch(1,2), difflog_df_can)
##
## Series Initialization:
## ARMA Model: arma
## Formula Mean: ~ arma(1, 1)
## GARCH Model: garch
## Formula Variance: ~ garch(1, 2)
## ARMA Order: 1 1
## Max ARMA Order: 1
## GARCH Order: 1 2
## Max GARCH Order: 2
## Maximum Order: 2
## Conditional Dist: norm
## h.start: 3
## llh.start: 1
## Length of Series: 481
## Recursion Init: mci
## Series Scale: 0.001575839
##
## Parameter Initialization:
## Initial Parameters: $params
## Limits of Transformations: $U, $V
## Which Parameters are Fixed? $includes
## Parameter Matrix:
## U V params includes
## mu -0.11701707 0.1170171 -0.01720972 TRUE
## ar1 -0.99999999 1.0000000 0.65255913 TRUE
## ma1 -0.99999999 1.0000000 -0.47076571 TRUE
## omega 0.00000100 100.0000000 0.10000000 TRUE
## alpha1 0.00000001 1.0000000 0.10000000 TRUE
## gamma1 -0.99999999 1.0000000 0.10000000 FALSE
## beta1 0.00000001 1.0000000 0.40000000 TRUE
## beta2 0.00000001 1.0000000 0.40000000 TRUE
## delta 0.00000000 2.0000000 2.00000000 FALSE
## skew 0.10000000 10.0000000 1.00000000 FALSE
## shape 1.00000000 10.0000000 4.00000000 FALSE
## Index List of Parameters to be Optimized:
## mu ar1 ma1 omega alpha1 beta1 beta2
## 1 2 3 4 5 7 8
## Persistence: 0.9
##
##
## --- START OF TRACE ---
## Selected Algorithm: nlminb
##
## R coded nlminb Solver:
##
## 0: 651.93548: -0.0172097 0.652559 -0.470766 0.100000 0.100000 0.400000 0.400000
## 1: 651.57613: -0.0172096 0.651435 -0.471789 0.0954006 0.0996099 0.397563 0.397586
## 2: 651.19302: -0.0172089 0.647217 -0.475638 0.0913752 0.108572 0.400873 0.401053
## 3: 650.27622: -0.0172074 0.639392 -0.482956 0.0717104 0.118769 0.399457 0.400068
## 4: 649.65203: -0.0172042 0.636831 -0.486618 0.0562629 0.127350 0.410200 0.412747
## 5: 649.60663: -0.0171991 0.638849 -0.487145 0.0364252 0.121151 0.417942 0.423332
## 6: 649.52073: -0.0171936 0.632418 -0.495263 0.0402722 0.118159 0.419831 0.427493
## 7: 649.41579: -0.0171854 0.636136 -0.495414 0.0430367 0.125319 0.412625 0.422302
## 8: 649.41564: -0.0171825 0.638211 -0.494675 0.0433132 0.125232 0.412511 0.423145
## 9: 649.41194: -0.0171804 0.638735 -0.495009 0.0428427 0.124553 0.411967 0.423322
## 10: 649.40912: -0.0171777 0.639123 -0.495681 0.0431270 0.124174 0.411786 0.424041
## 11: 649.40489: -0.0171579 0.642367 -0.499871 0.0432713 0.122477 0.409129 0.427525
## 12: 649.39939: -0.0171364 0.646292 -0.503558 0.0431397 0.122962 0.406129 0.430899
## 13: 649.38045: -0.0170218 0.662804 -0.522000 0.0431618 0.125670 0.387914 0.445981
## 14: 649.37406: -0.0168666 0.666051 -0.531328 0.0465796 0.130436 0.362698 0.464185
## 15: 649.26730: -0.0151699 0.670140 -0.529807 0.0491674 0.146543 0.115244 0.697918
## 16: 649.26255: -0.0150194 0.671973 -0.532916 0.0481127 0.145786 0.104546 0.703380
## 17: 649.20728: -0.0150148 0.672763 -0.533449 0.0489392 0.145525 0.111984 0.698487
## 18: 649.18877: -0.0149987 0.674248 -0.535820 0.0486952 0.142465 0.125916 0.687947
## 19: 649.18016: -0.0146598 0.687542 -0.545883 0.0504779 0.140772 0.134440 0.677870
## 20: 649.15846: -0.0142986 0.697144 -0.561217 0.0483826 0.139882 0.137965 0.677863
## 21: 649.14927: -0.0131289 0.707404 -0.574172 0.0506923 0.138672 0.126031 0.688334
## 22: 649.14578: -0.0119439 0.713758 -0.582433 0.0495838 0.137374 0.135215 0.679674
## 23: 649.14027: -0.0107884 0.701502 -0.572350 0.0500181 0.137303 0.151538 0.663712
## 24: 649.13496: -0.0102705 0.711674 -0.580301 0.0494938 0.138996 0.145276 0.670106
## 25: 649.13379: -0.0104874 0.708682 -0.577148 0.0496690 0.138569 0.145429 0.669628
## 26: 649.13378: -0.0104231 0.708969 -0.577552 0.0496641 0.138533 0.145732 0.669366
## 27: 649.13378: -0.0104264 0.708935 -0.577497 0.0496635 0.138541 0.145721 0.669373
## 28: 649.13378: -0.0104261 0.708935 -0.577495 0.0496636 0.138541 0.145725 0.669368
##
## Final Estimate of the Negative LLH:
## LLH: -2454.743 norm LLH: -5.103417
## mu ar1 ma1 omega alpha1
## -1.642982e-05 7.089345e-01 -5.774954e-01 1.233281e-07 1.385409e-01
## beta1 beta2
## 1.457252e-01 6.693681e-01
##
## R-optimhess Difference Approximated Hessian Matrix:
## mu ar1 ma1 omega
## mu -1.450835e+09 4.053677e+04 -4.142039e+04 9.345235e+10
## ar1 4.053677e+04 -8.717570e+02 -7.085125e+02 3.464039e+07
## ma1 -4.142039e+04 -7.085125e+02 -6.186625e+02 3.403420e+07
## omega 9.345235e+10 3.464039e+07 3.403420e+07 -1.968184e+15
## alpha1 9.602098e+03 5.200135e+00 7.526521e+00 -2.857636e+09
## beta1 8.119125e+04 8.941819e+01 9.739995e+01 -3.758088e+09
## beta2 3.377754e+04 1.106499e+02 1.111953e+02 -3.759304e+09
## alpha1 beta1 beta2
## mu 9.602098e+03 8.119125e+04 3.377754e+04
## ar1 5.200135e+00 8.941819e+01 1.106499e+02
## ma1 7.526521e+00 9.739995e+01 1.111953e+02
## omega -2.857636e+09 -3.758088e+09 -3.759304e+09
## alpha1 -5.597660e+03 -6.281493e+03 -6.277315e+03
## beta1 -6.281493e+03 -8.130694e+03 -8.144135e+03
## beta2 -6.277315e+03 -8.144135e+03 -8.179943e+03
## attr(,"time")
## Time difference of 0.01715589 secs
##
## --- END OF TRACE ---
##
##
## Time to Estimate Parameters:
## Time difference of 0.05963302 secs
summary(can.garch12)
##
## Title:
## GARCH Modelling
##
## Call:
## garchFit(formula = ~arma(1, 1) + garch(1, 2), data = difflog_df_can)
##
## Mean and Variance Equation:
## data ~ arma(1, 1) + garch(1, 2)
## <environment: 0x7fd9ccc7d118>
## [data = difflog_df_can]
##
## Conditional Distribution:
## norm
##
## Coefficient(s):
## mu ar1 ma1 omega alpha1
## -1.6430e-05 7.0893e-01 -5.7750e-01 1.2333e-07 1.3854e-01
## beta1 beta2
## 1.4573e-01 6.6937e-01
##
## Std. Errors:
## based on Hessian
##
## Error Analysis:
## Estimate Std. Error t value Pr(>|t|)
## mu -1.643e-05 2.854e-05 -0.576 0.564855
## ar1 7.089e-01 1.362e-01 5.207 1.92e-07 ***
## ma1 -5.775e-01 1.617e-01 -3.571 0.000355 ***
## omega 1.233e-07 6.693e-08 1.843 0.065364 .
## alpha1 1.385e-01 3.749e-02 3.696 0.000219 ***
## beta1 1.457e-01 2.348e-01 0.621 0.534882
## beta2 6.694e-01 2.241e-01 2.987 0.002819 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Log Likelihood:
## 2454.743 normalized: 5.103417
##
## Description:
## Wed Apr 22 22:50:27 2020 by user:
##
##
## Standardised Residuals Tests:
## Statistic p-Value
## Jarque-Bera Test R Chi^2 12.30608 0.002127004
## Shapiro-Wilk Test R W 0.9910229 0.005108671
## Ljung-Box Test R Q(10) 5.688111 0.8407509
## Ljung-Box Test R Q(15) 13.06553 0.5972364
## Ljung-Box Test R Q(20) 18.8714 0.5301987
## Ljung-Box Test R^2 Q(10) 9.76461 0.461382
## Ljung-Box Test R^2 Q(15) 15.6774 0.4038094
## Ljung-Box Test R^2 Q(20) 18.26272 0.5701064
## LM Arch Test R TR^2 11.80473 0.4614865
##
## Information Criterion Statistics:
## AIC BIC SIC HQIC
## -10.17773 -10.11696 -10.17814 -10.15384
Comparing with the AIC, BIC, AICc, we should choose GARCH(1,1).
df_uk = ts(data$United.Kingdom, frequency = 12, start = c(1980, 1))
plot.ts(df_uk)
plot(decompose(df_uk))
Overall increasing trend By decomposing, there is seasonality.
sp=spectrum(df_uk, log='no', main = "U.K.: Periodogram")
sp$fre[which.max(sp$spec)]
## [1] 0.09876543
1/sp$fre[which.max(sp$spec)]
## [1] 10.125
max(sp$spec) #estimate of spectral density at predominant period
## [1] 0.03913253
U = qchisq(.025,2)
L = qchisq(.975,2)
2*max(sp$spec)/L #lower bound for spectral density
## [1] 0.01060824
2*max(sp$spec)/U #upper bound for spectral density
## [1] 1.545652
The predominant frequency is near 0.1, therefore the period of the cycle is 10 month. The spectral density at predominant period is 0.04. The 95% confidence interval is [0.01, 1.55].
# make it stationary
difflog_df_uk = diff(log(df_uk)) #take the log transformation and first order difference
plot.ts(difflog_df_uk, main = "U.K.: Transformed CNEPI")
acf2(difflog_df_uk)
## ACF PACF
## [1,] 0.18 0.18
## [2,] -0.06 -0.10
## [3,] -0.07 -0.05
## [4,] -0.16 -0.15
## [5,] -0.07 -0.03
## [6,] -0.10 -0.12
## [7,] -0.04 -0.03
## [8,] 0.00 -0.04
## [9,] -0.04 -0.07
## [10,] 0.04 0.02
## [11,] 0.15 0.11
## [12,] 0.07 0.01
## [13,] -0.04 -0.06
## [14,] -0.13 -0.10
## [15,] -0.15 -0.10
## [16,] -0.11 -0.09
## [17,] 0.04 0.06
## [18,] 0.05 -0.01
## [19,] 0.06 0.01
## [20,] 0.03 -0.02
## [21,] -0.05 -0.08
## [22,] 0.03 0.01
## [23,] 0.05 0.02
## [24,] 0.04 0.04
## [25,] 0.01 0.02
## [26,] -0.04 0.01
## [27,] -0.01 0.02
## [28,] -0.01 -0.03
## [29,] 0.06 0.05
## [30,] 0.10 0.05
## [31,] -0.02 -0.04
## [32,] -0.10 -0.06
## [33,] -0.04 0.02
## [34,] -0.02 -0.02
## [35,] 0.07 0.07
## [36,] 0.05 0.01
## [37,] 0.00 0.00
## [38,] 0.01 0.02
## [39,] -0.04 -0.01
## [40,] -0.08 -0.08
## [41,] -0.02 -0.02
## [42,] -0.01 -0.01
## [43,] -0.04 -0.02
## [44,] -0.10 -0.10
## [45,] -0.09 -0.07
## [46,] 0.01 -0.05
## [47,] 0.15 0.08
## [48,] 0.12 0.04
If we ignore the seasonality, based on the ACF and PACF, we should try ARIMA(1,0,1) and ARIMA(2,0,1)
arima101 = sarima(difflog_df_uk,1,0,1)
## initial value -8.111849
## iter 2 value -8.117678
## iter 3 value -8.130135
## iter 4 value -8.130242
## iter 5 value -8.133012
## iter 6 value -8.133473
## iter 7 value -8.133519
## iter 8 value -8.133523
## iter 9 value -8.133525
## iter 10 value -8.133529
## iter 11 value -8.133532
## iter 12 value -8.133534
## iter 13 value -8.133534
## iter 13 value -8.133534
## iter 13 value -8.133534
## final value -8.133534
## converged
## initial value -8.134187
## iter 2 value -8.134188
## iter 3 value -8.134208
## iter 4 value -8.134210
## iter 5 value -8.134219
## iter 6 value -8.134220
## iter 6 value -8.134220
## iter 6 value -8.134220
## final value -8.134220
## converged
arima201 = sarima(difflog_df_uk,2,0,1)
## initial value -8.111850
## iter 2 value -8.119295
## iter 3 value -8.132707
## iter 4 value -8.132818
## iter 5 value -8.132826
## iter 6 value -8.132894
## iter 7 value -8.133075
## iter 8 value -8.134116
## iter 9 value -8.134329
## iter 10 value -8.134862
## iter 11 value -8.139129
## iter 12 value -8.140789
## iter 13 value -8.141249
## iter 14 value -8.142595
## iter 15 value -8.143181
## iter 16 value -8.144508
## iter 17 value -8.144663
## iter 18 value -8.144890
## iter 19 value -8.144900
## iter 20 value -8.144901
## iter 21 value -8.144901
## iter 22 value -8.144902
## iter 22 value -8.144902
## iter 22 value -8.144902
## final value -8.144902
## converged
## initial value -8.148527
## iter 2 value -8.148557
## iter 3 value -8.148682
## iter 4 value -8.148807
## iter 5 value -8.148920
## iter 6 value -8.149048
## iter 7 value -8.149065
## iter 8 value -8.149065
## iter 8 value -8.149065
## iter 8 value -8.149065
## final value -8.149065
## converged
arima_table = data.frame(Model = c("ARIMA(1,0,1)", "ARIMA(2,0,1)"), AIC = c(arima101$AIC, arima201$AIC), BIC = c(arima101$BIC, arima201$BIC), AICc = c(arima101$AICc, arima201$AICc))
arima_table
## Model AIC BIC AICc
## 1 ARIMA(1,0,1) -13.41393 -13.37920 -13.41383
## 2 ARIMA(2,0,1) -13.43946 -13.39606 -13.43929
Comparing these two models, ARIMA(2, 0, 1) is better.
auto.arima(difflog_df_uk, seasonal = FALSE)
## Series: difflog_df_uk
## ARIMA(1,0,2) with zero mean
##
## Coefficients:
## ar1 ma1 ma2
## 0.8453 -0.6803 -0.2531
## s.e. 0.1692 0.1651 0.0488
##
## sigma^2 estimated as 8.415e-08: log likelihood=3236.73
## AIC=-6465.45 AICc=-6465.37 BIC=-6448.75
Then we use auto.arima() function, it shows ARIMA(1,0,2) is good.
arima102 = sarima(difflog_df_uk,1,0,2)
## initial value -8.111849
## iter 2 value -8.119608
## iter 3 value -8.132900
## iter 4 value -8.133189
## iter 5 value -8.133214
## iter 6 value -8.133219
## iter 7 value -8.133373
## iter 8 value -8.134015
## iter 9 value -8.134537
## iter 10 value -8.134698
## iter 11 value -8.134835
## iter 12 value -8.135383
## iter 13 value -8.135981
## iter 14 value -8.136711
## iter 15 value -8.138256
## iter 16 value -8.138813
## iter 17 value -8.140071
## iter 18 value -8.141055
## iter 19 value -8.142306
## iter 20 value -8.144232
## iter 21 value -8.145399
## iter 22 value -8.145782
## iter 23 value -8.145987
## iter 24 value -8.146162
## iter 25 value -8.146164
## iter 26 value -8.146168
## iter 27 value -8.146168
## iter 28 value -8.146169
## iter 29 value -8.146170
## iter 29 value -8.146170
## iter 29 value -8.146170
## final value -8.146170
## converged
## initial value -8.148174
## iter 2 value -8.148184
## iter 3 value -8.148200
## iter 4 value -8.148208
## iter 5 value -8.148242
## iter 6 value -8.148282
## iter 7 value -8.148337
## iter 8 value -8.148380
## iter 9 value -8.148394
## iter 10 value -8.148409
## iter 11 value -8.148416
## iter 12 value -8.148418
## iter 13 value -8.148428
## iter 14 value -8.148428
## iter 15 value -8.148428
## iter 16 value -8.148428
## iter 17 value -8.148429
## iter 18 value -8.148430
## iter 19 value -8.148431
## iter 19 value -8.148431
## final value -8.148431
## converged
arima_table = data.frame(Model = c("ARIMA(1,0,1)", "ARIMA(2,0,1)", "ARIMA(1,0,2)"), AIC = c(arima101$AIC, arima201$AIC, arima102$AIC), BIC = c(arima101$BIC, arima201$BIC, arima102$BIC), AICc = c(arima101$AICc, arima201$AICc, arima102$AICc))
arima_table
## Model AIC BIC AICc
## 1 ARIMA(1,0,1) -13.41393 -13.37920 -13.41383
## 2 ARIMA(2,0,1) -13.43946 -13.39606 -13.43929
## 3 ARIMA(1,0,2) -13.43820 -13.39479 -13.43802
It shows ARIMA(2,0,1) is the best model.
auto.arima(df_uk, seasonal = TRUE)
## Series: df_uk
## ARIMA(1,1,2)
##
## Coefficients:
## ar1 ma1 ma2
## 0.8449 -0.6801 -0.2531
## s.e. 0.1596 0.1562 0.0485
##
## sigma^2 estimated as 0.0008594: log likelihood=1022.48
## AIC=-2036.97 AICc=-2036.88 BIC=-2020.26
Therefore, the seasonality component is not very significant. We can directly use ARIMA model instead of SARIMA model.
#since ARIMA(2,1,1) is the best model, we use this model to predict the future
fit_uk = Arima(df_uk, order=c(2,1,1))
pred_uk=forecast(fit_uk, h=48)
pred_uk # The last 2 columns are 95% prediction intervals lower bound and upper bound
## Point Forecast Lo 80 Hi 80 Lo 95 Hi 95
## Mar 2020 99.96326 99.92572 100.0008 99.90584 100.0207
## Apr 2020 99.95921 99.90160 100.0168 99.87110 100.0473
## May 2020 99.95553 99.88498 100.0261 99.84763 100.0634
## Jun 2020 99.95268 99.87346 100.0319 99.83152 100.0738
## Jul 2020 99.95063 99.86522 100.0360 99.82001 100.0813
## Aug 2020 99.94919 99.85907 100.0393 99.81137 100.0870
## Sep 2020 99.94819 99.85428 100.0421 99.80456 100.0918
## Oct 2020 99.94751 99.85036 100.0447 99.79894 100.0961
## Nov 2020 99.94705 99.84704 100.0471 99.79410 100.1000
## Dec 2020 99.94673 99.84411 100.0493 99.78979 100.1037
## Jan 2021 99.94652 99.84147 100.0516 99.78586 100.1072
## Feb 2021 99.94637 99.83902 100.0537 99.78218 100.1106
## Mar 2021 99.94627 99.83671 100.0558 99.77871 100.1138
## Apr 2021 99.94621 99.83451 100.0579 99.77538 100.1170
## May 2021 99.94616 99.83239 100.0599 99.77216 100.1202
## Jun 2021 99.94613 99.83033 100.0619 99.76903 100.1232
## Jul 2021 99.94611 99.82833 100.0639 99.76599 100.1262
## Aug 2021 99.94610 99.82638 100.0658 99.76301 100.1292
## Sep 2021 99.94609 99.82447 100.0677 99.76008 100.1321
## Oct 2021 99.94608 99.82259 100.0696 99.75721 100.1349
## Nov 2021 99.94608 99.82074 100.0714 99.75439 100.1378
## Dec 2021 99.94607 99.81892 100.0732 99.75161 100.1405
## Jan 2022 99.94607 99.81713 100.0750 99.74887 100.1433
## Feb 2022 99.94607 99.81536 100.0768 99.74617 100.1460
## Mar 2022 99.94607 99.81362 100.0785 99.74351 100.1486
## Apr 2022 99.94607 99.81191 100.0802 99.74088 100.1513
## May 2022 99.94607 99.81021 100.0819 99.73829 100.1538
## Jun 2022 99.94607 99.80854 100.0836 99.73573 100.1564
## Jul 2022 99.94607 99.80688 100.0853 99.73320 100.1589
## Aug 2022 99.94607 99.80525 100.0869 99.73070 100.1614
## Sep 2022 99.94607 99.80363 100.0885 99.72823 100.1639
## Oct 2022 99.94607 99.80203 100.0901 99.72579 100.1663
## Nov 2022 99.94607 99.80045 100.0917 99.72337 100.1688
## Dec 2022 99.94607 99.79889 100.0932 99.72098 100.1712
## Jan 2023 99.94607 99.79734 100.0948 99.71861 100.1735
## Feb 2023 99.94607 99.79581 100.0963 99.71627 100.1759
## Mar 2023 99.94607 99.79430 100.0978 99.71395 100.1782
## Apr 2023 99.94607 99.79280 100.0993 99.71166 100.1805
## May 2023 99.94607 99.79131 100.1008 99.70939 100.1827
## Jun 2023 99.94607 99.78984 100.1023 99.70714 100.1850
## Jul 2023 99.94607 99.78838 100.1038 99.70491 100.1872
## Aug 2023 99.94607 99.78694 100.1052 99.70270 100.1894
## Sep 2023 99.94607 99.78550 100.1066 99.70051 100.1916
## Oct 2023 99.94607 99.78409 100.1080 99.69834 100.1938
## Nov 2023 99.94607 99.78268 100.1095 99.69619 100.1959
## Dec 2023 99.94607 99.78128 100.1108 99.69405 100.1981
## Jan 2024 99.94607 99.77990 100.1122 99.69194 100.2002
## Feb 2024 99.94607 99.77853 100.1136 99.68984 100.2023
autoplot(pred_uk) #plot with confidence interval
#use the best model arima(2,0,1)
acf2(resid(arima201$fit), 20)
## ACF PACF
## [1,] 0.00 0.00
## [2,] -0.01 -0.01
## [3,] 0.05 0.05
## [4,] -0.07 -0.07
## [5,] 0.04 0.04
## [6,] -0.05 -0.05
## [7,] 0.00 0.01
## [8,] 0.04 0.03
## [9,] -0.04 -0.03
## [10,] 0.02 0.01
## [11,] 0.12 0.12
## [12,] 0.04 0.04
## [13,] -0.03 -0.03
## [14,] -0.08 -0.09
## [15,] -0.09 -0.09
## [16,] -0.09 -0.09
## [17,] 0.05 0.07
## [18,] 0.02 0.02
## [19,] 0.04 0.04
## [20,] 0.03 0.02
acf2(resid(arima201$fit)^2, 20)
## ACF PACF
## [1,] 0.13 0.13
## [2,] 0.02 0.01
## [3,] -0.02 -0.03
## [4,] 0.08 0.09
## [5,] 0.06 0.04
## [6,] 0.09 0.08
## [7,] 0.05 0.04
## [8,] 0.06 0.04
## [9,] -0.03 -0.04
## [10,] -0.01 -0.02
## [11,] 0.05 0.04
## [12,] -0.05 -0.08
## [13,] 0.03 0.04
## [14,] 0.00 -0.01
## [15,] 0.01 0.01
## [16,] 0.00 0.01
## [17,] 0.07 0.07
## [18,] -0.05 -0.06
## [19,] 0.02 0.02
## [20,] -0.02 -0.01
Based on the ACF and PACF of residuals, it is more like the white noise. Based on the ACF and PACF of residuals squared, we should try GARCH(1,0)
uk.garch10 = garchFit(~arma(2,1)+garch(1,0), difflog_df_uk)
##
## Series Initialization:
## ARMA Model: arma
## Formula Mean: ~ arma(2, 1)
## GARCH Model: garch
## Formula Variance: ~ garch(1, 0)
## ARMA Order: 2 1
## Max ARMA Order: 2
## GARCH Order: 1 0
## Max GARCH Order: 1
## Maximum Order: 2
## Conditional Dist: norm
## h.start: 3
## llh.start: 1
## Length of Series: 481
## Recursion Init: mci
## Series Scale: 0.000300023
##
## Parameter Initialization:
## Initial Parameters: $params
## Limits of Transformations: $U, $V
## Which Parameters are Fixed? $includes
## Parameter Matrix:
## U V params includes
## mu -0.13350687 0.1335069 0.01142066 TRUE
## ar1 -0.99999999 1.0000000 1.04911994 TRUE
## ar2 -0.99999999 1.0000000 -0.25107351 TRUE
## ma1 -0.99999999 1.0000000 -0.88575811 TRUE
## omega 0.00000100 100.0000000 0.10000000 TRUE
## alpha1 0.00000001 1.0000000 0.10000000 TRUE
## gamma1 -0.99999999 1.0000000 0.10000000 FALSE
## delta 0.00000000 2.0000000 2.00000000 FALSE
## skew 0.10000000 10.0000000 1.00000000 FALSE
## shape 1.00000000 10.0000000 4.00000000 FALSE
## Index List of Parameters to be Optimized:
## mu ar1 ar2 ma1 omega alpha1
## 1 2 3 4 5 6
## Persistence: 0.1
##
##
## --- START OF TRACE ---
## Selected Algorithm: nlminb
##
## R coded nlminb Solver:
##
## 0: 1445.5324: 0.0114207 1.00000 -0.251074 -0.885758 0.100000 0.100000
## 1: 678.62043: 0.0113671 1.00000 -0.201186 -0.811470 1.06118 0.360992
## 2: 674.16482: 0.0113601 0.973004 -0.227173 -0.831965 1.04238 0.346781
## 3: 663.35497: 0.0112907 0.955372 -0.237809 -0.832989 0.887481 0.230034
## 4: 662.39976: 0.0112778 0.967827 -0.224419 -0.818381 0.872082 0.217202
## 5: 661.68507: 0.0112626 0.954826 -0.236478 -0.828722 0.853845 0.203227
## 6: 661.14188: 0.0109367 0.943997 -0.237167 -0.783500 0.852676 0.170940
## 7: 660.67389: 0.0107223 0.923420 -0.216654 -0.811139 0.831145 0.166404
## 8: 660.30618: 0.0102705 0.980599 -0.234424 -0.830827 0.776598 0.202568
## 9: 660.25572: 0.0102555 0.973767 -0.238544 -0.834959 0.774180 0.197811
## 10: 660.16439: 0.0101865 0.974274 -0.231756 -0.831850 0.774384 0.192546
## 11: 660.09051: 0.00996364 0.965448 -0.230520 -0.832916 0.774968 0.183480
## 12: 660.01868: 0.00945036 0.966809 -0.238593 -0.818404 0.774015 0.180197
## 13: 659.91808: 0.00894305 0.967929 -0.228520 -0.830766 0.777548 0.172749
## 14: 659.91484: 0.00893622 0.967055 -0.229292 -0.831355 0.777384 0.172618
## 15: 659.91269: 0.00892086 0.967128 -0.228990 -0.830723 0.777078 0.172302
## 16: 659.90869: 0.00888605 0.966275 -0.229472 -0.831108 0.776702 0.172338
## 17: 659.70971: 0.00576592 0.968269 -0.215646 -0.844879 0.753777 0.201663
## 18: 659.60579: 0.00268530 1.00000 -0.234418 -0.871690 0.761722 0.182629
## 19: 659.51981: 0.000685040 1.00000 -0.222361 -0.861715 0.771888 0.176328
## 20: 659.45868: 0.00107268 0.981231 -0.225371 -0.851213 0.778678 0.169383
## 21: 659.44104: 0.00111661 0.990050 -0.226830 -0.859358 0.773545 0.174497
## 22: 659.43809: 0.00130483 0.992098 -0.225578 -0.857358 0.772724 0.175112
## 23: 659.42865: 0.00149677 0.991358 -0.226736 -0.858007 0.772122 0.175588
## 24: 659.42539: 0.00188294 0.991651 -0.227171 -0.857764 0.771246 0.176321
## 25: 659.42501: 0.00200534 0.991669 -0.227265 -0.858232 0.771919 0.176060
## 26: 659.42499: 0.00200395 0.991555 -0.227288 -0.858045 0.771725 0.175965
## 27: 659.42499: 0.00200231 0.991557 -0.227268 -0.858057 0.771760 0.176008
## 28: 659.42499: 0.00200251 0.991564 -0.227272 -0.858063 0.771759 0.176002
##
## Final Estimate of the Negative LLH:
## LLH: -3242.279 norm LLH: -6.740705
## mu ar1 ar2 ma1 omega
## 6.007991e-07 9.915644e-01 -2.272718e-01 -8.580633e-01 6.946895e-08
## alpha1
## 1.760016e-01
##
## R-optimhess Difference Approximated Hessian Matrix:
## mu ar1 ar2 ma1
## mu -3.015775e+11 -1.097299e+06 -6.994650e+05 6.485108e+05
## ar1 -1.097299e+06 -1.561654e+03 -1.392275e+03 -1.475015e+03
## ar2 -6.994650e+05 -1.392275e+03 -1.681839e+03 -1.369637e+03
## ma1 6.485108e+05 -1.475015e+03 -1.369637e+03 -1.726000e+03
## omega 1.106137e+12 -7.474029e+07 -1.067802e+08 -3.546032e+07
## alpha1 -3.171877e+05 3.296557e+01 5.149938e+01 2.303912e+01
## omega alpha1
## mu 1.106137e+12 -3.171877e+05
## ar1 -7.474029e+07 3.296557e+01
## ar2 -1.067802e+08 5.149938e+01
## ma1 -3.546032e+07 2.303912e+01
## omega -3.862692e+16 -1.809314e+09
## alpha1 -1.809314e+09 -3.179095e+02
## attr(,"time")
## Time difference of 0.01563287 secs
##
## --- END OF TRACE ---
##
##
## Time to Estimate Parameters:
## Time difference of 0.05250311 secs
summary(uk.garch10)
##
## Title:
## GARCH Modelling
##
## Call:
## garchFit(formula = ~arma(2, 1) + garch(1, 0), data = difflog_df_uk)
##
## Mean and Variance Equation:
## data ~ arma(2, 1) + garch(1, 0)
## <environment: 0x7fd9c92192f0>
## [data = difflog_df_uk]
##
## Conditional Distribution:
## norm
##
## Coefficient(s):
## mu ar1 ar2 ma1 omega
## 6.0080e-07 9.9156e-01 -2.2727e-01 -8.5806e-01 6.9469e-08
## alpha1
## 1.7600e-01
##
## Std. Errors:
## based on Hessian
##
## Error Analysis:
## Estimate Std. Error t value Pr(>|t|)
## mu 6.008e-07 1.853e-06 0.324 0.74571
## ar1 9.916e-01 6.843e-02 14.491 < 2e-16 ***
## ar2 -2.273e-01 4.830e-02 -4.706 2.53e-06 ***
## ma1 -8.581e-01 5.626e-02 -15.253 < 2e-16 ***
## omega 6.947e-08 5.960e-09 11.655 < 2e-16 ***
## alpha1 1.760e-01 6.596e-02 2.668 0.00763 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Log Likelihood:
## 3242.279 normalized: 6.740705
##
## Description:
## Wed Apr 22 22:50:29 2020 by user:
##
##
## Standardised Residuals Tests:
## Statistic p-Value
## Jarque-Bera Test R Chi^2 20.66601 3.254113e-05
## Shapiro-Wilk Test R W 0.9914875 0.007326074
## Ljung-Box Test R Q(10) 6.955381 0.7296504
## Ljung-Box Test R Q(15) 25.47321 0.04393815
## Ljung-Box Test R Q(20) 33.17176 0.03230477
## Ljung-Box Test R^2 Q(10) 9.504178 0.4850143
## Ljung-Box Test R^2 Q(15) 14.49048 0.4887014
## Ljung-Box Test R^2 Q(20) 21.94111 0.3437148
## LM Arch Test R TR^2 14.31159 0.2812549
##
## Information Criterion Statistics:
## AIC BIC SIC HQIC
## -13.45646 -13.40437 -13.45677 -13.43599
df_us = ts(data$United.States, frequency = 12, start = c(1980, 1))
plot.ts(df_us)
plot(decompose(df_us))
Clear trend: increase at first, then decrease, then increase By decomposing, there is seasonality.
sp=spectrum(df_us, log='no', main = "U.S.: Periodogram")
sp$fre[which.max(sp$spec)]
## [1] 0.02469136
1/sp$fre[which.max(sp$spec)]
## [1] 40.5
max(sp$spec) #estimate of spectral density at predominant period
## [1] 3.366589
U = qchisq(.025,2)
L = qchisq(.975,2)
2*max(sp$spec)/L #lower bound for spectral density
## [1] 0.912632
2*max(sp$spec)/U #upper bound for spectral density
## [1] 132.9732
The predominant frequency is near 0.02, therefore the period of the cycle is 40.5 month. The spectral density at predominant period is 3.37. The 95% confidence interval is [0.91, 132.97].
# make it stationary
difflog_df_us = diff(df_us) #take the log transformation and first order difference
plot.ts(difflog_df_us, main = "U.S.: Transformed CNEPI")
acf2(difflog_df_us)
## ACF PACF
## [1,] 0.31 0.31
## [2,] 0.04 -0.06
## [3,] -0.03 -0.03
## [4,] -0.09 -0.07
## [5,] -0.14 -0.10
## [6,] -0.15 -0.09
## [7,] -0.05 0.02
## [8,] -0.02 -0.02
## [9,] -0.01 -0.02
## [10,] 0.10 0.09
## [11,] 0.08 0.00
## [12,] 0.00 -0.05
## [13,] -0.07 -0.06
## [14,] -0.07 -0.03
## [15,] -0.12 -0.09
## [16,] -0.13 -0.06
## [17,] -0.03 0.02
## [18,] 0.01 -0.01
## [19,] 0.04 0.01
## [20,] 0.12 0.08
## [21,] 0.06 -0.05
## [22,] 0.03 0.00
## [23,] -0.04 -0.04
## [24,] -0.05 -0.01
## [25,] -0.01 0.04
## [26,] -0.08 -0.06
## [27,] -0.02 0.02
## [28,] 0.01 -0.01
## [29,] 0.00 -0.04
## [30,] 0.07 0.06
## [31,] 0.07 0.00
## [32,] -0.02 -0.07
## [33,] -0.02 0.02
## [34,] -0.06 -0.05
## [35,] 0.01 0.06
## [36,] 0.02 0.03
## [37,] 0.02 0.01
## [38,] -0.03 -0.07
## [39,] -0.06 -0.06
## [40,] 0.01 0.04
## [41,] 0.04 0.02
## [42,] 0.04 0.02
## [43,] 0.01 0.01
## [44,] 0.01 0.01
## [45,] -0.02 -0.05
## [46,] -0.02 0.03
## [47,] 0.05 0.05
## [48,] 0.04 0.01
If we ignore the seasonality, based on the ACF and PACF, we should try ARIMA(1,0,1) and ARIMA(0,0,1)
arima101 = sarima(difflog_df_us,1,0,1)
## initial value -2.253352
## iter 2 value -2.264934
## iter 3 value -2.306690
## iter 4 value -2.306698
## iter 5 value -2.306700
## iter 6 value -2.306712
## iter 7 value -2.306714
## iter 8 value -2.306714
## iter 8 value -2.306714
## final value -2.306714
## converged
## initial value -2.307266
## iter 2 value -2.307267
## iter 3 value -2.307267
## iter 4 value -2.307268
## iter 5 value -2.307268
## iter 6 value -2.307268
## iter 6 value -2.307268
## iter 6 value -2.307268
## final value -2.307268
## converged
arima001 = sarima(difflog_df_us,0,0,1)
## initial value -2.254087
## iter 2 value -2.306017
## iter 3 value -2.306028
## iter 4 value -2.306028
## iter 5 value -2.306028
## iter 6 value -2.306029
## iter 6 value -2.306029
## final value -2.306029
## converged
## initial value -2.305967
## iter 2 value -2.305967
## iter 2 value -2.305967
## iter 2 value -2.305967
## final value -2.305967
## converged
arima_table = data.frame(Model = c("ARIMA(1,0,1)", "ARIMA(0,0,1)"), AIC = c(arima101$AIC, arima001$AIC), BIC = c(arima101$BIC, arima001$BIC), AICc = c(arima101$AICc, arima001$AICc))
arima_table
## Model AIC BIC AICc
## 1 ARIMA(1,0,1) -1.760027 -1.725301 -1.759923
## 2 ARIMA(0,0,1) -1.761584 -1.735539 -1.761531
Comparing these two models, ARIMA(0, 0, 1) is better.
auto.arima(difflog_df_us, seasonal = FALSE)
## Series: difflog_df_us
## ARIMA(0,0,1) with zero mean
##
## Coefficients:
## ma1
## 0.3160
## s.e. 0.0415
##
## sigma^2 estimated as 0.009952: log likelihood=426.65
## AIC=-849.3 AICc=-849.27 BIC=-840.95
Then we use auto.arima() function, it proves that ARIMA(0,0,1) is better
auto.arima(df_us, seasonal = TRUE)
## Series: df_us
## ARIMA(0,1,1)
##
## Coefficients:
## ma1
## 0.3160
## s.e. 0.0415
##
## sigma^2 estimated as 0.009972: log likelihood=426.65
## AIC=-849.3 AICc=-849.27 BIC=-840.95
Therefore, the seasonality component is not very significant. We can directly use ARIMA model instead of SARIMA model.
#since ARIMA(2,1,1) is the best model, we use this model to predict the future
fit_us = Arima(df_us, order=c(0,1,1))
pred_us = forecast(fit_us, h=48)
pred_us # The last 2 columns are 95% prediction intervals lower bound and upper bound
## Point Forecast Lo 80 Hi 80 Lo 95 Hi 95
## Mar 2020 100.582 100.45398 100.7099 100.38624 100.7777
## Apr 2020 100.582 100.37044 100.7935 100.25846 100.9055
## May 2020 100.582 100.31158 100.8523 100.16845 100.9955
## Jun 2020 100.582 100.26342 100.9005 100.09479 101.0691
## Jul 2020 100.582 100.22164 100.9423 100.03089 101.1330
## Aug 2020 100.582 100.18422 100.9797 99.97367 101.1903
## Sep 2020 100.582 100.15003 101.0139 99.92138 101.2425
## Oct 2020 100.582 100.11836 101.0456 99.87294 101.2910
## Nov 2020 100.582 100.08872 101.0752 99.82761 101.3363
## Dec 2020 100.582 100.06076 101.1032 99.78485 101.3791
## Jan 2021 100.582 100.03422 101.1297 99.74426 101.4197
## Feb 2021 100.582 100.00891 101.1550 99.70556 101.4584
## Mar 2021 100.582 99.98468 101.1792 99.66849 101.4954
## Apr 2021 100.582 99.96139 101.2025 99.63288 101.5310
## May 2021 100.582 99.93894 101.2250 99.59855 101.5654
## Jun 2021 100.582 99.91725 101.2467 99.56537 101.5985
## Jul 2021 100.582 99.89625 101.2677 99.53325 101.6307
## Aug 2021 100.582 99.87587 101.2881 99.50208 101.6618
## Sep 2021 100.582 99.85606 101.3079 99.47179 101.6921
## Oct 2021 100.582 99.83678 101.3271 99.44230 101.7216
## Nov 2021 100.582 99.81798 101.3459 99.41356 101.7504
## Dec 2021 100.582 99.79964 101.3643 99.38551 101.7784
## Jan 2022 100.582 99.78172 101.3822 99.35809 101.8058
## Feb 2022 100.582 99.76419 101.3997 99.33128 101.8326
## Mar 2022 100.582 99.74703 101.4169 99.30504 101.8589
## Apr 2022 100.582 99.73021 101.4337 99.27932 101.8846
## May 2022 100.582 99.71372 101.4502 99.25410 101.9098
## Jun 2022 100.582 99.69754 101.4664 99.22935 101.9346
## Jul 2022 100.582 99.68164 101.4823 99.20504 101.9589
## Aug 2022 100.582 99.66603 101.4979 99.18116 101.9828
## Sep 2022 100.582 99.65067 101.5133 99.15768 102.0062
## Oct 2022 100.582 99.63556 101.5284 99.13457 102.0294
## Nov 2022 100.582 99.62070 101.5432 99.11183 102.0521
## Dec 2022 100.582 99.60605 101.5579 99.08944 102.0745
## Jan 2023 100.582 99.59163 101.5723 99.06738 102.0965
## Feb 2023 100.582 99.57741 101.5865 99.04563 102.1183
## Mar 2023 100.582 99.56339 101.6005 99.02419 102.1397
## Apr 2023 100.582 99.54956 101.6144 99.00304 102.1609
## May 2023 100.582 99.53591 101.6280 98.98217 102.1818
## Jun 2023 100.582 99.52244 101.6415 98.96157 102.2024
## Jul 2023 100.582 99.50914 101.6548 98.94122 102.2227
## Aug 2023 100.582 99.49600 101.6679 98.92113 102.2428
## Sep 2023 100.582 99.48302 101.6809 98.90128 102.2626
## Oct 2023 100.582 99.47019 101.6937 98.88165 102.2823
## Nov 2023 100.582 99.45751 101.7064 98.86225 102.3017
## Dec 2023 100.582 99.44496 101.7190 98.84307 102.3209
## Jan 2024 100.582 99.43256 101.7314 98.82410 102.3398
## Feb 2024 100.582 99.42028 101.7436 98.80533 102.3586
autoplot(pred_us) #plot with confidence interval
#use the best model arima(2,0,1)
acf2(resid(arima001$fit), 20)
## ACF PACF
## [1,] 0.02 0.02
## [2,] 0.05 0.05
## [3,] -0.03 -0.03
## [4,] -0.06 -0.06
## [5,] -0.08 -0.08
## [6,] -0.12 -0.12
## [7,] -0.02 -0.01
## [8,] -0.01 0.00
## [9,] -0.03 -0.05
## [10,] 0.09 0.07
## [11,] 0.06 0.04
## [12,] 0.00 -0.02
## [13,] -0.06 -0.07
## [14,] -0.03 -0.03
## [15,] -0.08 -0.07
## [16,] -0.10 -0.08
## [17,] -0.01 0.00
## [18,] 0.01 0.00
## [19,] 0.00 -0.02
## [20,] 0.11 0.09
acf2(resid(arima001$fit)^2, 20)
## ACF PACF
## [1,] 0.22 0.22
## [2,] 0.10 0.06
## [3,] 0.05 0.02
## [4,] 0.07 0.05
## [5,] 0.11 0.09
## [6,] 0.15 0.10
## [7,] 0.07 0.01
## [8,] 0.00 -0.04
## [9,] -0.02 -0.04
## [10,] 0.01 0.00
## [11,] -0.02 -0.05
## [12,] -0.03 -0.04
## [13,] -0.02 0.00
## [14,] -0.03 -0.01
## [15,] 0.00 0.03
## [16,] -0.04 -0.03
## [17,] -0.02 0.01
## [18,] -0.05 -0.03
## [19,] -0.06 -0.04
## [20,] 0.03 0.06
Based on the ACF and PACF of residuals, it is more like the white noise. Based on the ACF and PACF of residuals squared, we should try GARCH(1,0) and GARCH(1,1)
us.garch11 = garchFit(~arma(0,1)+garch(1,1), difflog_df_us)
##
## Series Initialization:
## ARMA Model: arma
## Formula Mean: ~ arma(0, 1)
## GARCH Model: garch
## Formula Variance: ~ garch(1, 1)
## ARMA Order: 0 1
## Max ARMA Order: 1
## GARCH Order: 1 1
## Max GARCH Order: 1
## Maximum Order: 1
## Conditional Dist: norm
## h.start: 2
## llh.start: 1
## Length of Series: 481
## Recursion Init: mci
## Series Scale: 0.1050786
##
## Parameter Initialization:
## Initial Parameters: $params
## Limits of Transformations: $U, $V
## Which Parameters are Fixed? $includes
## Parameter Matrix:
## U V params includes
## mu -0.07189256 0.07189256 0.008642788 TRUE
## ma1 -0.99999999 0.99999999 0.315967121 TRUE
## omega 0.00000100 100.00000000 0.100000000 TRUE
## alpha1 0.00000001 0.99999999 0.100000000 TRUE
## gamma1 -0.99999999 0.99999999 0.100000000 FALSE
## beta1 0.00000001 0.99999999 0.800000000 TRUE
## delta 0.00000000 2.00000000 2.000000000 FALSE
## skew 0.10000000 10.00000000 1.000000000 FALSE
## shape 1.00000000 10.00000000 4.000000000 FALSE
## Index List of Parameters to be Optimized:
## mu ma1 omega alpha1 beta1
## 1 2 3 4 6
## Persistence: 0.9
##
##
## --- START OF TRACE ---
## Selected Algorithm: nlminb
##
## R coded nlminb Solver:
##
## 0: 627.37621: 0.00864279 0.315967 0.100000 0.100000 0.800000
## 1: 623.63898: 0.00864277 0.315392 0.0777472 0.101274 0.783528
## 2: 620.53715: 0.00864274 0.314665 0.0791916 0.128454 0.788736
## 3: 615.34721: 0.00864269 0.313075 0.0385094 0.160761 0.769435
## 4: 609.95947: 0.00864285 0.314167 0.0362602 0.216137 0.770504
## 5: 609.19537: 0.00864286 0.314179 0.0298528 0.215892 0.767204
## 6: 608.90753: 0.00864291 0.314380 0.0252005 0.221353 0.766504
## 7: 608.64472: 0.00864347 0.317518 0.0290149 0.234593 0.763621
## 8: 608.22237: 0.00864405 0.319228 0.0271299 0.241185 0.751048
## 9: 607.90149: 0.00864670 0.312404 0.0341959 0.261878 0.733520
## 10: 607.80436: 0.00864674 0.312412 0.0287369 0.262665 0.731828
## 11: 607.67334: 0.00864749 0.312989 0.0289542 0.265860 0.736591
## 12: 607.64226: 0.00864847 0.314509 0.0245741 0.268204 0.737686
## 13: 607.57996: 0.00865049 0.318448 0.0266808 0.271061 0.737572
## 14: 607.53674: 0.00865324 0.315677 0.0264849 0.273549 0.733810
## 15: 607.49806: 0.00866289 0.307495 0.0276830 0.279800 0.731919
## 16: 607.41478: 0.00867793 0.324913 0.0276422 0.288868 0.724086
## 17: 607.41121: 0.00867804 0.323956 0.0260997 0.289862 0.723606
## 18: 607.39202: 0.00868243 0.323339 0.0274241 0.291076 0.722877
## 19: 607.37631: 0.00869035 0.321404 0.0269675 0.293551 0.720276
## 20: 607.35003: 0.00874152 0.321036 0.0268222 0.298156 0.719838
## 21: 607.32558: 0.00874265 0.314584 0.0278634 0.302534 0.716626
## 22: 607.31785: 0.00879101 0.315966 0.0286173 0.305296 0.712518
## 23: 607.31710: 0.00879107 0.315783 0.0283024 0.305451 0.712483
## 24: 607.31647: 0.00879170 0.315628 0.0284760 0.305712 0.712649
## 25: 607.31565: 0.00879661 0.315484 0.0281770 0.305933 0.712573
## 26: 607.31483: 0.00880753 0.315313 0.0282749 0.306351 0.712651
## 27: 607.31386: 0.00883015 0.315129 0.0280780 0.306666 0.712469
## 28: 607.30247: 0.00953375 0.311425 0.0289684 0.310456 0.708414
## 29: 607.29609: 0.0102644 0.310262 0.0282783 0.314415 0.707986
## 30: 607.27641: 0.0128158 0.305706 0.0284555 0.314195 0.707561
## 31: 607.23654: 0.0198913 0.305344 0.0292754 0.318543 0.703007
## 32: 607.21252: 0.0253281 0.306774 0.0279637 0.312819 0.708688
## 33: 607.20834: 0.0261257 0.310675 0.0276170 0.311793 0.709956
## 34: 607.20825: 0.0258585 0.311010 0.0275207 0.311381 0.710410
## 35: 607.20825: 0.0258323 0.311024 0.0275285 0.311421 0.710374
## 36: 607.20825: 0.0258325 0.311021 0.0275280 0.311420 0.710375
##
## Final Estimate of the Negative LLH:
## LLH: -476.5073 norm LLH: -0.9906597
## mu ma1 omega alpha1 beta1
## 0.0027144390 0.3110213762 0.0003039502 0.3114196520 0.7103751210
##
## R-optimhess Difference Approximated Hessian Matrix:
## mu ma1 omega alpha1
## mu -65726.5836 -219.209479 -356597.11 -208.98512
## ma1 -219.2095 -441.300817 -13333.07 -26.45518
## omega -356597.1074 -13333.070187 -133424899.73 -332943.09833
## alpha1 -208.9851 -26.455180 -332943.10 -1681.60873
## beta1 -791.5421 3.443319 -635760.19 -2427.41427
## beta1
## mu -7.915421e+02
## ma1 3.443319e+00
## omega -6.357602e+05
## alpha1 -2.427414e+03
## beta1 -4.297641e+03
## attr(,"time")
## Time difference of 0.01077604 secs
##
## --- END OF TRACE ---
##
##
## Time to Estimate Parameters:
## Time difference of 0.04257584 secs
summary(us.garch11)
##
## Title:
## GARCH Modelling
##
## Call:
## garchFit(formula = ~arma(0, 1) + garch(1, 1), data = difflog_df_us)
##
## Mean and Variance Equation:
## data ~ arma(0, 1) + garch(1, 1)
## <environment: 0x7fd9c76c5a90>
## [data = difflog_df_us]
##
## Conditional Distribution:
## norm
##
## Coefficient(s):
## mu ma1 omega alpha1 beta1
## 0.00271444 0.31102138 0.00030395 0.31141965 0.71037512
##
## Std. Errors:
## based on Hessian
##
## Error Analysis:
## Estimate Std. Error t value Pr(>|t|)
## mu 0.0027144 0.0039512 0.687 0.4921
## ma1 0.3110214 0.0481527 6.459 1.05e-10 ***
## omega 0.0003040 0.0001669 1.821 0.0686 .
## alpha1 0.3114197 0.0587411 5.302 1.15e-07 ***
## beta1 0.7103751 0.0483309 14.698 < 2e-16 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Log Likelihood:
## 476.5073 normalized: 0.9906597
##
## Description:
## Wed Apr 22 22:50:31 2020 by user:
##
##
## Standardised Residuals Tests:
## Statistic p-Value
## Jarque-Bera Test R Chi^2 20.86165 2.950864e-05
## Shapiro-Wilk Test R W 0.9895454 0.001673182
## Ljung-Box Test R Q(10) 13.64167 0.1899724
## Ljung-Box Test R Q(15) 24.95106 0.05060454
## Ljung-Box Test R Q(20) 38.94699 0.006769295
## Ljung-Box Test R^2 Q(10) 7.380037 0.6891457
## Ljung-Box Test R^2 Q(15) 13.02762 0.6001644
## Ljung-Box Test R^2 Q(20) 16.09748 0.7105584
## LM Arch Test R TR^2 9.858589 0.6283651
##
## Information Criterion Statistics:
## AIC BIC SIC HQIC
## -1.960529 -1.917121 -1.960742 -1.943468
us.garch10 = garchFit(~arma(0,1)+garch(1,0), difflog_df_us)
##
## Series Initialization:
## ARMA Model: arma
## Formula Mean: ~ arma(0, 1)
## GARCH Model: garch
## Formula Variance: ~ garch(1, 0)
## ARMA Order: 0 1
## Max ARMA Order: 1
## GARCH Order: 1 0
## Max GARCH Order: 1
## Maximum Order: 1
## Conditional Dist: norm
## h.start: 2
## llh.start: 1
## Length of Series: 481
## Recursion Init: mci
## Series Scale: 0.1050786
##
## Parameter Initialization:
## Initial Parameters: $params
## Limits of Transformations: $U, $V
## Which Parameters are Fixed? $includes
## Parameter Matrix:
## U V params includes
## mu -0.07189256 0.07189256 0.008642788 TRUE
## ma1 -0.99999999 0.99999999 0.315967121 TRUE
## omega 0.00000100 100.00000000 0.100000000 TRUE
## alpha1 0.00000001 0.99999999 0.100000000 TRUE
## gamma1 -0.99999999 0.99999999 0.100000000 FALSE
## delta 0.00000000 2.00000000 2.000000000 FALSE
## skew 0.10000000 10.00000000 1.000000000 FALSE
## shape 1.00000000 10.00000000 4.000000000 FALSE
## Index List of Parameters to be Optimized:
## mu ma1 omega alpha1
## 1 2 3 4
## Persistence: 0.1
##
##
## --- START OF TRACE ---
## Selected Algorithm: nlminb
##
## R coded nlminb Solver:
##
## 0: 1197.8317: 0.00864279 0.315967 0.100000 0.100000
## 1: 654.18876: 0.00864178 0.302705 1.02845 0.471232
## 2: 652.83706: 0.00863797 0.242037 1.01193 0.479204
## 3: 632.82287: 0.00863546 0.257425 0.773439 0.394551
## 4: 624.19041: 0.00863238 0.259005 0.590534 0.355262
## 5: 623.16298: 0.00862784 0.257788 0.525941 0.391314
## 6: 622.95172: 0.00861919 0.257970 0.543855 0.463088
## 7: 622.89810: 0.00849061 0.259995 0.477695 0.490885
## 8: 622.68915: 0.00833622 0.258348 0.507765 0.491506
## 9: 622.66424: 0.00808101 0.257742 0.506556 0.481189
## 10: 622.63093: 0.00706571 0.257965 0.506854 0.476104
## 11: 622.30402: -0.00839715 0.258008 0.503982 0.446050
## 12: 622.03220: -0.0246523 0.259181 0.504875 0.439360
## 13: 621.73558: -0.0513831 0.260130 0.501896 0.459738
## 14: 621.70359: -0.0545374 0.260899 0.506880 0.469934
## 15: 621.69994: -0.0541555 0.260795 0.503988 0.478161
## 16: 621.69970: -0.0535200 0.260467 0.504810 0.477537
## 17: 621.69970: -0.0534933 0.260610 0.504763 0.477502
## 18: 621.69970: -0.0535012 0.260567 0.504753 0.477508
## 19: 621.69970: -0.0535006 0.260568 0.504754 0.477508
##
## Final Estimate of the Negative LLH:
## LLH: -462.0158 norm LLH: -0.9605319
## mu ma1 omega alpha1
## -0.005621768 0.260567749 0.005573248 0.477508495
##
## R-optimhess Difference Approximated Hessian Matrix:
## mu ma1 omega alpha1
## mu -46171.758655 -317.421040 2888.9819 9.224856
## ma1 -317.421040 -437.344319 -610.6143 2.206603
## omega 2888.981878 -610.614289 -4521581.9411 -12133.103817
## alpha1 9.224856 2.206603 -12133.1038 -155.600202
## attr(,"time")
## Time difference of 0.005912066 secs
##
## --- END OF TRACE ---
##
##
## Time to Estimate Parameters:
## Time difference of 0.03112984 secs
summary(us.garch10)
##
## Title:
## GARCH Modelling
##
## Call:
## garchFit(formula = ~arma(0, 1) + garch(1, 0), data = difflog_df_us)
##
## Mean and Variance Equation:
## data ~ arma(0, 1) + garch(1, 0)
## <environment: 0x7fd9cbb12318>
## [data = difflog_df_us]
##
## Conditional Distribution:
## norm
##
## Coefficient(s):
## mu ma1 omega alpha1
## -0.0056218 0.2605677 0.0055732 0.4775085
##
## Std. Errors:
## based on Hessian
##
## Error Analysis:
## Estimate Std. Error t value Pr(>|t|)
## mu -0.005622 0.004666 -1.205 0.228
## ma1 0.260568 0.047949 5.434 5.50e-08 ***
## omega 0.005573 0.000529 10.536 < 2e-16 ***
## alpha1 0.477509 0.090164 5.296 1.18e-07 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Log Likelihood:
## 462.0158 normalized: 0.9605319
##
## Description:
## Wed Apr 22 22:50:31 2020 by user:
##
##
## Standardised Residuals Tests:
## Statistic p-Value
## Jarque-Bera Test R Chi^2 21.08861 2.634306e-05
## Shapiro-Wilk Test R W 0.9919224 0.01030616
## Ljung-Box Test R Q(10) 9.740902 0.4635115
## Ljung-Box Test R Q(15) 20.92794 0.1391477
## Ljung-Box Test R Q(20) 30.61945 0.06041904
## Ljung-Box Test R^2 Q(10) 19.53114 0.03401257
## Ljung-Box Test R^2 Q(15) 27.53572 0.02466273
## Ljung-Box Test R^2 Q(20) 32.03134 0.0429655
## LM Arch Test R TR^2 18.07213 0.1135178
##
## Information Criterion Statistics:
## AIC BIC SIC HQIC
## -1.904432 -1.869705 -1.904569 -1.890783
Based on the AIC and BIC, we should choose GARCH(1,1).